if (ivrs_mappings[alias_id].intremap_table == NULL )
{
/* allocate per-device interrupt remapping table */
- ivrs_mappings[alias_id].intremap_table =
+ if ( amd_iommu_perdev_intremap )
+ ivrs_mappings[alias_id].intremap_table =
amd_iommu_alloc_intremap_table();
+ else
+ {
+ if ( shared_intremap_table == NULL )
+ shared_intremap_table = amd_iommu_alloc_intremap_table();
+ ivrs_mappings[alias_id].intremap_table = shared_intremap_table;
+ }
}
/* assgin iommu hardware */
ivrs_mappings[bdf].iommu = iommu;
/* Tell the device to stop DMAing; we can't rely on the guest to
* control it for us. */
for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
- if ( get_requestor_id(bdf) == device_id )
+ if ( get_dma_requestor_id(bdf) == device_id )
{
cword = pci_conf_read16(PCI_BUS(bdf), PCI_SLOT(bdf),
PCI_FUNC(bdf), PCI_COMMAND);
ivrs_mappings[bdf].dte_ext_int_pass = IOMMU_CONTROL_DISABLED;
ivrs_mappings[bdf].dte_init_pass = IOMMU_CONTROL_DISABLED;
- spin_lock_init(&ivrs_mappings[bdf].intremap_lock);
+ if ( amd_iommu_perdev_intremap )
+ spin_lock_init(&ivrs_mappings[bdf].intremap_lock);
}
return 0;
}
#define INTREMAP_ENTRIES (1 << INTREMAP_LENGTH)
int ioapic_bdf[MAX_IO_APICS];
+void *shared_intremap_table;
+static DEFINE_SPINLOCK(shared_intremap_lock);
static spinlock_t* get_intremap_lock(int req_id)
{
- return &ivrs_mappings[req_id].intremap_lock;
+ return (amd_iommu_perdev_intremap ?
+ &ivrs_mappings[req_id].intremap_lock:
+ &shared_intremap_lock);
+}
+
+static int get_intremap_requestor_id(int bdf)
+{
+ ASSERT( bdf < ivrs_bdf_entries );
+ return ivrs_mappings[bdf].dte_requestor_id;
}
static int get_intremap_offset(u8 vector, u8 dm)
spinlock_t *lock;
int offset;
- req_id = get_requestor_id(bdf);
+ req_id = get_intremap_requestor_id(bdf);
lock = get_intremap_lock(req_id);
delivery_mode = rte->delivery_mode;
continue;
}
- req_id = get_requestor_id(bdf);
+ req_id = get_intremap_requestor_id(bdf);
lock = get_intremap_lock(req_id);
delivery_mode = rte.delivery_mode;
{
unsigned long flags;
u32* entry;
- u16 bdf, req_id;
+ u16 bdf, req_id, alias_id;
u8 delivery_mode, dest, vector, dest_mode;
spinlock_t *lock;
int offset;
bdf = (pdev->bus << 8) | pdev->devfn;
- req_id = get_requestor_id(bdf);
+ req_id = get_dma_requestor_id(bdf);
+ alias_id = get_intremap_requestor_id(bdf);
if ( msg == NULL )
{
free_intremap_entry(req_id, msi_desc->remap_index);
spin_unlock_irqrestore(lock, flags);
+ if ( ( req_id != alias_id ) &&
+ ivrs_mappings[alias_id].intremap_table != NULL )
+ {
+ lock = get_intremap_lock(alias_id);
+ spin_lock_irqsave(lock, flags);
+ free_intremap_entry(alias_id, msi_desc->remap_index);
+ spin_unlock_irqrestore(lock, flags);
+ }
goto done;
}
update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
spin_unlock_irqrestore(lock, flags);
+ /*
+ * In some special cases, a pci-e device(e.g SATA controller in IDE mode)
+ * will use alias id to index interrupt remapping table.
+ * We have to setup a secondary interrupt remapping entry to satisfy those
+ * devices.
+ */
+
+ lock = get_intremap_lock(alias_id);
+ if ( ( req_id != alias_id ) &&
+ ivrs_mappings[alias_id].intremap_table != NULL )
+ {
+ spin_lock_irqsave(lock, flags);
+ entry = (u32*)get_intremap_entry(alias_id, offset);
+ update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
+ spin_unlock_irqrestore(lock, flags);
+ }
+
done:
if ( iommu->enabled )
{
spin_lock_irqsave(&iommu->lock, flags);
invalidate_interrupt_table(iommu, req_id);
+ if ( alias_id != req_id )
+ invalidate_interrupt_table(iommu, alias_id);
flush_command_buffer(iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
}
for_each_pdev( d, pdev )
{
bdf = (pdev->bus << 8) | pdev->devfn;
- req_id = get_requestor_id(bdf);
+ req_id = get_dma_requestor_id(bdf);
iommu = find_iommu_for_device(bdf);
if ( !iommu )
{
return ivrs_mappings[bdf].iommu;
}
-int get_requestor_id(u16 bdf)
+/*
+ * Some devices will use alias id and original device id to index interrupt
+ * table and I/O page table respectively. Such devices will have
+ * both alias entry and select entry in IVRS structure.
+ *
+ * Return original device id, if device has valid interrupt remapping
+ * table setup for both select entry and alias entry.
+ */
+int get_dma_requestor_id(u16 bdf)
{
+ int req_id;
+
BUG_ON ( bdf >= ivrs_bdf_entries );
- return ivrs_mappings[bdf].dte_requestor_id;
+ req_id = ivrs_mappings[bdf].dte_requestor_id;
+ if ( (ivrs_mappings[bdf].intremap_table != NULL) &&
+ (ivrs_mappings[req_id].intremap_table != NULL) )
+ req_id = bdf;
+
+ return req_id;
}
static int is_translation_valid(u32 *entry)
valid = 0;
/* get device-table entry */
- req_id = get_requestor_id(bdf);
+ req_id = get_dma_requestor_id(bdf);
dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
spin_lock_irqsave(&iommu->lock, flags);
int req_id;
BUG_ON ( iommu->dev_table.buffer == NULL );
- req_id = get_requestor_id(bdf);
+ req_id = get_dma_requestor_id(bdf);
dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
spin_lock_irqsave(&iommu->lock, flags);
static int amd_iommu_assign_device(struct domain *d, u8 bus, u8 devfn)
{
int bdf = (bus << 8) | devfn;
- int req_id = get_requestor_id(bdf);
+ int req_id = get_dma_requestor_id(bdf);
if ( ivrs_mappings[req_id].unity_map_enable )
{
int rt;
int bdf = (bus << 8) | devfn;
rt = ( bdf < ivrs_bdf_entries ) ?
- get_requestor_id(bdf) :
+ get_dma_requestor_id(bdf) :
bdf;
return rt;
}
bool_t __read_mostly iommu_hap_pt_share;
bool_t __read_mostly iommu_debug;
bool_t __read_mostly iommu_amd_perdev_vector_map = 1;
+bool_t __read_mostly amd_iommu_perdev_intremap;
static void __init parse_iommu_param(char *s)
{
iommu_intremap = 0;
else if ( !strcmp(s, "debug") )
iommu_debug = 1;
+ else if ( !strcmp(s, "amd-iommu-perdev-intremap") )
+ amd_iommu_perdev_intremap = 1;
else if ( !strcmp(s, "dom0-passthrough") )
iommu_passthrough = 1;
else if ( !strcmp(s, "dom0-strict") )
void amd_iommu_share_p2m(struct domain *d);
/* device table functions */
-int get_requestor_id(u16 bdf);
+int get_dma_requestor_id(u16 bdf);
void amd_iommu_add_dev_table_entry(
u32 *dte, u8 sys_mgt, u8 dev_ex, u8 lint1_pass, u8 lint0_pass,
u8 nmi_pass, u8 ext_int_pass, u8 init_pass);
unsigned int apic, unsigned int reg);
extern int ioapic_bdf[MAX_IO_APICS];
+extern void *shared_intremap_table;
/* power management support */
void amd_iommu_resume(void);
extern bool_t iommu_snoop, iommu_qinval, iommu_intremap;
extern bool_t iommu_hap_pt_share;
extern bool_t iommu_debug;
+extern bool_t amd_iommu_perdev_intremap;
extern struct rangeset *mmio_ro_ranges;